Test Output

Console

Basic

Required files:

test_plan.py

#!/usr/bin/env python
# This plan contains tests that demonstrate failures as well.
"""
This example shows how to configure console output for your tests
programmatically and via command line options.
"""
import sys

from testplan.testing.multitest import MultiTest, testsuite, testcase

from testplan import test_plan
from testplan.report.testing.styles import Style, StyleEnum


# Here is a sample test suite with failing / passing assertions and testcases.
# We can try out different console output styles to see
# how the test data gets printed.


@testsuite
class AlphaSuite:
    @testcase
    def test_equality_passing(self, env, result):
        result.equal(1, 1, description="passing equality")

    @testcase
    def test_equality_failing(self, env, result):
        result.equal(2, 1, description="failing equality")

    @testcase
    def test_membership_passing(self, env, result):
        result.contain(1, [1, 2, 3], description="passing membership")

    @testcase
    def test_membership_failing(self, env, result):
        result.contain(
            member=1,
            container={"foo": 1, "bar": 2},
            description="failing membership",
        )

    @testcase
    def test_regex_passing(self, env, result):
        result.regex.match(
            regexp="foo", value="foobar", description="passing regex match"
        )

    @testcase
    def test_regex_failing(self, env, result):
        result.regex.match(
            regexp="bar", value="foobaz", description="failing regex match"
        )


@testsuite
class BetaSuite:
    @testcase
    def passing_testcase_one(self, env, result):
        result.equal(1, 1, description="passing equality")

    @testcase
    def passing_testcase_two(self, env, result):
        result.equal("foo", "foo", description="another passing equality")


# The most verbose representation, prints out full
# assertion details for passing & failing testcases.
all_details_a = Style(passing="assertion-detail", failing="assertion-detail")
all_details_b = Style(
    passing=StyleEnum.ASSERTION_DETAIL, failing=StyleEnum.ASSERTION_DETAIL
)

# Terse representation, just prints out final result status, no details.
result_only_a = Style(passing="result", failing="result")
result_only_b = Style(passing=StyleEnum.RESULT, failing=StyleEnum.RESULT)

# A general good practice is to have more details for failing tests:

# Descriptions / names for passing assertions
# All details for failing assertions
style_1_a = Style(passing="assertion", failing="assertion-detail")
style_1_b = Style(
    passing=StyleEnum.ASSERTION, failing=StyleEnum.ASSERTION_DETAIL
)

# Testcase names for passing testcases
# Assertion descriptions / names for failing assertions
style_2_a = Style(passing="testcase", failing="assertion")
style_2_b = Style(passing=StyleEnum.TESTCASE, failing=StyleEnum.ASSERTION)

# Suite names for passing suites
# Testcase names for failing testcases
style_3_a = Style(passing="testsuite", failing="testcase")
style_3_b = Style(passing=StyleEnum.TESTSUITE, failing=StyleEnum.TESTCASE)

# Multitest names for passing multitest instances
# Suite names for failing suites

style_4_a = Style(passing="test", failing="testsuite")
style_4_b = Style(passing=StyleEnum.TEST, failing=StyleEnum.TESTSUITE)


# In addition to programmatic declarations above, we support limited
# console output styling options via `--stdout-style` argument:

# `--stdout-style result-only`: Displays final test plan result only.
# `--stdout-style summary`: Test level pass/fail status.
# `--stdout-style extended-summary`: Assertion details for failing
#                                    tests, testcase names for passing ones.
# `--stdout-style detailed`: Assertion details of both passing/failing tests.


# Replace the `stdout_style` argument with the styles defined
# above to see how they change console output.


@test_plan(
    name="Command line output configuration example",
    stdout_style=all_details_a,
)
def main(plan):

    multi_test_1 = MultiTest(name="Primary", suites=[AlphaSuite()])
    multi_test_2 = MultiTest(name="Secondary", suites=[BetaSuite()])
    plan.add(multi_test_1)
    plan.add(multi_test_2)


if __name__ == "__main__":
    sys.exit(not main())

Multi-level

Required files:

test_plan.py

#!/usr/bin/env python
# This plan contains tests that demonstrate failures as well.
"""
This example shows how console output can be configured on different
levels (e.g. plan, multitest).
"""
import sys

from testplan.testing.multitest import MultiTest, testsuite, testcase

from testplan import test_plan
from testplan.report.testing.styles import Style


@testsuite
class AlphaSuite:
    @testcase
    def test_equality_passing(self, env, result):
        result.equal(1, 1, description="passing equality")

    @testcase
    def test_equality_failing(self, env, result):
        result.equal(2, 1, description="failing equality")

    @testcase
    def test_membership_passing(self, env, result):
        result.contain(1, [1, 2, 3], description="passing membership")

    @testcase
    def test_membership_failing(self, env, result):
        result.contain(
            member=1,
            container={"foo": 1, "bar": 2},
            description="failing membership",
        )


@testsuite
class BetaSuite:
    @testcase
    def test_regex_passing(self, env, result):
        result.regex.match(
            regexp="foo", value="foobar", description="passing regex match"
        )

    @testcase
    def test_regex_failing(self, env, result):
        result.regex.match(
            regexp="bar", value="foobaz", description="failing regex match"
        )


# In the example below, we have plan level configuration for console output,
# which will print out testcase names for passing tests and assertion details
# for failing ones.

# However for `Multitest('Secondary')` we have also have a lower level
# configuration for console output, which will override the plan level
# config for that particular multitest.


@test_plan(
    name="Multi-level command line output configuration example",
    stdout_style=Style(passing="testcase", failing="assertion-detail"),
)
def main(plan):

    multi_test_1 = MultiTest(name="Primary", suites=[AlphaSuite()])
    multi_test_2 = MultiTest(
        name="Secondary",
        suites=[BetaSuite()],
        # Just print out assertion names / descriptions but not the details
        stdout_style=Style(passing="assertion", failing="assertion"),
    )
    plan.add(multi_test_1)
    plan.add(multi_test_2)


if __name__ == "__main__":
    sys.exit(not main())

Exporters

Custom

Required files:

test_plan.py

#!/usr/bin/env python
# This plan contains tests that demonstrate failures as well.
"""
This example shows how to implement a custom test report exporter and
how to integrate it with your test plan.
"""
import os
import sys
from typing import Dict, Optional

from testplan import test_plan
from testplan.common.exporters import ExportContext
from testplan.common.utils.logger import TESTPLAN_LOGGER
from testplan.exporters.testing import Exporter
from testplan.report import TestReport
from testplan.testing.multitest import MultiTest, testsuite, testcase


@testsuite
class AlphaSuite:
    @testcase
    def test_equality_passing(self, env, result):
        result.equal(1, 1, description="passing equality")

    @testcase
    def test_equality_failing(self, env, result):
        result.equal(2, 1, description="failing equality")

    @testcase
    def test_membership_passing(self, env, result):
        result.contain(1, [1, 2, 3], description="passing membership")

    @testcase
    def test_membership_failing(self, env, result):
        result.contain(
            member=1,
            container={"foo": 1, "bar": 2},
            description="failing membership",
        )

    @testcase
    def test_regex_passing(self, env, result):
        result.regex.match(
            regexp="foo", value="foobar", description="passing regex match"
        )

    @testcase
    def test_regex_failing(self, env, result):
        result.regex.match(
            regexp="bar", value="foobaz", description="failing regex match"
        )


@testsuite
class BetaSuite:
    @testcase
    def passing_testcase_one(self, env, result):
        result.equal(1, 1, description="passing equality")

    @testcase
    def passing_testcase_two(self, env, result):
        result.equal("foo", "foo", description="another passing equality")


# To implement a basic test report exporter, just inherit from the base
# class `testplan.exporters.testing.Exporter`.


# Custom base class that will be used by the examples below
# Dumps the text content to the given file path.
class TextFileExporter(Exporter):
    def __init__(self, file_path):
        self.file_path = file_path

    def get_text_content(self, source):
        raise NotImplementedError

    def export(
        self,
        source: TestReport,
        export_context: ExportContext,
    ) -> Optional[Dict]:
        """
        Exports report to text file in the given directory.

        :param: source: Testplan report to export
        :param: export_context: information about other exporters
        :return: dictionary containing the possible output
        """

        with open(self.file_path, "w+") as report_file:
            report_file.write(self.get_text_content(source))
            TESTPLAN_LOGGER.user_info(
                "%s output generated at %s",
                self.__class__.__name__,
                self.file_path,
            )

        return {"text_file": self.file_path}


class ReprExporter(TextFileExporter):
    """Dumps the native representation of the test report to a text file."""

    def get_text_content(self, source):
        return repr(source)


class IndentedTextExporter(TextFileExporter):
    """
    Iterates over flattened test data and prints out an item in each line,
    indenting by their depth on the test report tree.
    """

    def get_text_content(self, source):
        # Reports have a utility method `flatten` that gives us a list of
        # items with their depths.
        report_data = source.flatten(depths=True)

        result = []
        for depth, item in report_data:
            # Skip assertion data
            if isinstance(item, dict):
                continue

            result.append(
                "{indent}{item} - {pass_label}".format(
                    indent=depth * " ",
                    item=item,
                    pass_label="Pass" if item.passed else "Fail",
                )
            )
        return os.linesep.join(result)


curr_dir = os.path.dirname(__file__)


# To programmatically enable exporters, just pass them as a list of items
# to `exporters` argument for the `@test_plan` decorator.
@test_plan(
    name="Custom exporter example",
    exporters=[
        ReprExporter(file_path=os.path.join(curr_dir, "repr_report.txt")),
        IndentedTextExporter(
            file_path=os.path.join(curr_dir, "indented_report.txt")
        ),
    ],
)
def main(plan):

    multi_test_1 = MultiTest(name="Primary", suites=[AlphaSuite()])
    multi_test_2 = MultiTest(name="Secondary", suites=[BetaSuite()])
    plan.add(multi_test_1)
    plan.add(multi_test_2)


if __name__ == "__main__":
    sys.exit(not main())

JSON

Required files:

test_plan.py

#!/usr/bin/env python
# This plan contains tests that demonstrate failures as well.
"""
This example shows how to generate a JSON report of test results.
"""
import os
import sys

from testplan.testing.multitest import MultiTest, testsuite, testcase

from testplan import test_plan


@testsuite
class AlphaSuite:
    @testcase
    def test_equality_passing(self, env, result):
        result.equal(1, 1, description="passing equality")

    @testcase
    def test_equality_failing(self, env, result):
        result.equal(2, 1, description="failing equality")

    @testcase
    def test_membership_passing(self, env, result):
        result.contain(1, [1, 2, 3], description="passing membership")

    @testcase
    def test_membership_failing(self, env, result):
        result.contain(
            member=1,
            container={"foo": 1, "bar": 2},
            description="failing membership",
        )

    @testcase
    def test_regex_passing(self, env, result):
        result.regex.match(
            regexp="foo", value="foobar", description="passing regex match"
        )

    @testcase
    def test_regex_failing(self, env, result):
        result.regex.match(
            regexp="bar", value="foobaz", description="failing regex match"
        )


@testsuite
class BetaSuite:
    @testcase
    def passing_testcase_one(self, env, result):
        result.equal(1, 1, description="passing equality")

    @testcase
    def passing_testcase_two(self, env, result):
        result.equal("foo", "foo", description="another passing equality")


# `@test_plan` accepts shortcut argument `json_path`
# for JSON reports, meaning that you don't have to instantiate a JSONExporter
# explicitly for basic JSON report generation.

# A JSON report can also be generated via command line arguments like:
# ./test_plan.py --json <report-path>

# <report-path> should be valid system file path.

# If you want to test out command line configuration for JSON generation
# please directly use --json argument because command line arguments can
# override programmatic declaration.


@test_plan(
    name="Basic JSON Report Example",
    json_path=os.path.join(os.path.dirname(__file__), "report.json"),
)
def main(plan):

    multi_test_1 = MultiTest(name="Primary", suites=[AlphaSuite()])
    multi_test_2 = MultiTest(name="Secondary", suites=[BetaSuite()])
    plan.add(multi_test_1)
    plan.add(multi_test_2)


if __name__ == "__main__":
    sys.exit(not main())

json_to_pdf.py

PDF

Required files:

test_plan.py

#!/usr/bin/env python
# This plan contains tests that demonstrate failures as well.
"""
This example shows:

* How to generate a PDF report of test results.

* How to configure the PDF report style programmatically and via command line.

"""
import os
import sys

from testplan.testing.multitest import MultiTest, testsuite, testcase

from testplan import test_plan
from testplan.report.testing.styles import Style


@testsuite
class AlphaSuite:
    @testcase
    def test_equality_passing(self, env, result):
        result.equal(1, 1, description="passing equality")

    @testcase
    def test_equality_failing(self, env, result):
        result.equal(2, 1, description="failing equality")

    @testcase
    def test_membership_passing(self, env, result):
        result.contain(1, [1, 2, 3], description="passing membership")

    @testcase
    def test_membership_failing(self, env, result):
        result.contain(
            member=1,
            container={"foo": 1, "bar": 2},
            description="failing membership",
        )

    @testcase
    def test_regex_passing(self, env, result):
        result.regex.match(
            regexp="foo", value="foobar", description="passing regex match"
        )

    @testcase
    def test_regex_failing(self, env, result):
        result.regex.match(
            regexp="bar", value="foobaz", description="failing regex match"
        )


@testsuite
class BetaSuite:
    @testcase
    def passing_testcase_one(self, env, result):
        result.equal(1, 1, description="passing equality")

    @testcase
    def passing_testcase_two(self, env, result):
        result.equal("foo", "foo", description="another passing equality")


# `@test_plan` accepts shortcut arguments `pdf_path` and `pdf_style`
# for PDF reports, meaning that you don't have to instantiate a PDFExporter
# explicitly for basic PDF report generation.

# A PDF report can also be generated via command line arguments like:
# ./test_plan.py --pdf <report-path> --pdf-style <report-style>

# <report-path> should be valid system file path and <report-style> should be
# one of: `result-only`, `summary`, `extended-summary`, `detailed`.

# If you want to test out command line configuration for PDF generation
# please directly use --pdf argument because command line arguments can
# override programmatic declaration.


@test_plan(
    name="Basic PDF Report Example",
    pdf_path=os.path.join(os.path.dirname(__file__), "report.pdf"),
    pdf_style=Style(passing="testcase", failing="assertion-detail"),
)
def main(plan):

    multi_test_1 = MultiTest(name="Primary", suites=[AlphaSuite()])
    multi_test_2 = MultiTest(name="Secondary", suites=[BetaSuite()])
    plan.add(multi_test_1)
    plan.add(multi_test_2)


if __name__ == "__main__":
    sys.exit(not main())

Tagged Filtered PDF

Required files:

test_plan.py

#!/usr/bin/env python
# This plan contains tests that demonstrate failures as well.
"""
This example shows:

* How to generate multiple PDF reports by tag using tags.

* How to configure the generated PDF report styles
  programmatically and via command line.
"""
import os
import sys

from testplan.testing.multitest import MultiTest, testsuite, testcase

from testplan import test_plan
from testplan.report.testing.styles import Style


@testsuite(tags="server")
class AlphaSuite:
    @testcase(tags={"color": "red"})
    def test_equality_passing(self, env, result):
        result.equal(1, 1, description="passing equality")

    @testcase
    def test_equality_failing(self, env, result):
        result.equal(2, 1, description="failing equality")

    @testcase
    def test_membership_passing(self, env, result):
        result.contain(1, [1, 2, 3], description="passing membership")

    @testcase
    def test_membership_failing(self, env, result):
        result.contain(
            member=1,
            container={"foo": 1, "bar": 2},
            description="failing membership",
        )

    @testcase(tags={"color": "blue"})
    def test_regex_passing(self, env, result):
        result.regex.match(
            regexp="foo", value="foobar", description="passing regex match"
        )

    @testcase(tags={"color": ("red", "blue")})
    def test_regex_failing(self, env, result):
        result.regex.match(
            regexp="bar", value="foobaz", description="failing regex match"
        )


@testsuite(tags="client")
class BetaSuite:
    @testcase
    def passing_testcase_one(self, env, result):
        result.equal(1, 1, description="passing equality")

    @testcase(tags={"color": "red"})
    def passing_testcase_two(self, env, result):
        result.equal("foo", "foo", description="another passing equality")


# `@test_plan` accepts shortcut arguments `report_tags` and `report_tags_all`
# for Tag filtered PDF reports, meaning that you don't have to instantiate a
# TagFilteredPDFExporter explicitly.

# You can use `pdf_style` argument to apply common styling to all
# generated PDF reports.

# If you want to test out command line configuration for PDF generation
# please directly use --report-tags, --report-tags-all, --report-dir and
# --pdf-style arguments because command line arguments can override
# programmatic declaration.

# An example command line call for tag filtered PDFs would be:
# ./test_plan --report-dir . --report-tags server color=red,blue
# --report-tags client color=red,blue --report-tags-all color=red,blue

# The command above will generate 3 PDFs, assuming
# the filtered test data is not empty.


@test_plan(
    name="Basic PDF Report Example",
    # Each item in the list corresponds to a PDF report
    report_tags=[
        "server",  # Report contains tests tagged with `server`
        "client",  # Report contains tests tagged with `client`
        # Report contains tests tagged with `color=red` OR `color=blue`
        {"color": ("red", "blue")},
    ],
    # Each item in the list corresponds to a PDF report
    report_tags_all=[
        # Report contains tests tagged with `server` AND `color=red`
        {"simple": "server", "color": "red"},
        # Report contains tests tagged with `color=red` AND `color=blue`
        {"color": ("red", "blue")},
    ],
    # All of the PDFs are going to be generated in this directory.
    report_dir=os.path.dirname(__file__),
    # This will be the common styling for all PDFs.
    pdf_style=Style(passing="testcase", failing="assertion-detail"),
)
def main(plan):

    multi_test_1 = MultiTest(name="Primary", suites=[AlphaSuite()])
    multi_test_2 = MultiTest(name="Secondary", suites=[BetaSuite()])
    plan.add(multi_test_1)
    plan.add(multi_test_2)


if __name__ == "__main__":
    sys.exit(not main())

Cross Page Assertions

A few of the assertions are enhanced to be able to render large content that doesn’t fit into a single page. Ths example demonstrates those that are currently available.

Required files:

test_plan.py

#!/usr/bin/env python
# This plan contains tests that demonstrate failures as well.
"""
This example generates a sample pdf that contains assertions that could cross
multiple pages.
"""
import os
import sys

from testplan.testing.multitest import MultiTest, testsuite, testcase

from testplan import test_plan
from testplan.report.testing.styles import Style


@testsuite
class AlphaSuite:

    msg = (
        "This is a super looooooooooog message with indents, extra spaces\n"
        "    and <Test>special</Test> characters,\n"
        "    and    it    will    be    written    as-is    in    pdf.\n" * 40
    )

    @testcase
    def test_log_assertions(self, env, result):
        result.log(self.msg, description="Logging a long msg")

    @testcase
    def test_regex_assertions(self, env, result):

        result.regex.match(
            regexp=".*super",
            value=self.msg,
            description="Single line match expect to pass",
        )
        result.regex.multiline_match(
            regexp=".*super",
            value=self.msg,
            description="Multiline match expect to pass",
        )
        result.regex.not_match(
            regexp=".*super",
            value=self.msg,
            description="Not_match expect to fail",
        )
        result.regex.search(
            regexp="super",
            value=self.msg,
            description="Search - highlights the first occurrence",
        )
        result.regex.findall(
            regexp="super",
            value=self.msg,
            description="Findall - highlights all occurrences",
        )
        result.regex.matchline(
            regexp=".*super", value=self.msg, description="Line-by-line match"
        )


# `@test_plan` accepts shortcut arguments `pdf_path` and `pdf_style`
# for PDF reports, meaning that you don't have to instantiate a PDFExporter
# explicitly for basic PDF report generation.

# A PDF report can also be generated via command line arguments like:
# ./test_plan.py --pdf <report-path> --pdf-style <report-style>

# <report-path> should be valid system file path and <report-style> should be
# one of: `result-only`, `summary`, `extended-summary`, `detailed`.

# If you want to test out command line configuration for PDF generation
# please directly use --pdf argument because command line arguments can
# override programmatic declaration.


@test_plan(
    name="Basic PDF Report Example",
    pdf_path=os.path.join(os.path.dirname(__file__), "report.pdf"),
    pdf_style=Style(passing="assertion-detail", failing="assertion-detail"),
)
def main(plan):

    plan.add(MultiTest(name="Primary", suites=[AlphaSuite()]))


if __name__ == "__main__":
    sys.exit(not main())

XML

Required files:

test_plan.py

#!/usr/bin/env python
# This plan contains tests that demonstrate failures as well.
"""This example shows how to generate XML reports in JUnit format."""
import os
import sys

from testplan.testing.multitest import MultiTest, testsuite, testcase
from testplan import test_plan


@testsuite
class AlphaSuite:
    @testcase
    def test_equality_passing(self, env, result):
        result.equal(1, 1, description="passing equality")

    @testcase
    def test_equality_failing(self, env, result):
        result.equal(2, 1, description="failing equality")

    @testcase
    def test_membership_passing(self, env, result):
        result.contain(1, [1, 2, 3], description="passing membership")


@testsuite
class BetaSuite:
    @testcase
    def test_error(self, env, result):
        result.equal(1, 1, description="passing equality")

    @testcase
    def passing_testcase_two(self, env, result):
        result.equal("foo", "foo", description="another passing equality")


# `@test_plan` accepts shortcut argument `xml_dir` for XML output, meaning
# you don't have to instantiate an XMLExporter explicitly for basic XML
# report generation.

# XML reports can also be generated via command line arguments like:
# ./test_plan.py --xml <xml-directory>

# <xml-directory> should be a valid system directory, if this directory already
# exists it will be removed and recreated.

# If you want to test out command line configuration for XML generation
# please directly use --xml argument because command line arguments can
# override programmatic declaration.


@test_plan(
    name="Basic XML Report Example",
    xml_dir=os.path.join(os.path.dirname(__file__), "xml"),
)
def main(plan):
    multi_test_1 = MultiTest(name="Primary", suites=[AlphaSuite()])
    multi_test_2 = MultiTest(name="Secondary", suites=[BetaSuite()])
    plan.add(multi_test_1)
    plan.add(multi_test_2)


if __name__ == "__main__":
    sys.exit(not main())